In this project, I will setup an image processing pipeline including:
import numpy as np
import cv2
import glob
from pathlib import Path
import matplotlib.pyplot as plt
import pickle
%matplotlib inline
ROOT_PATH = '/home/downloads/carnd-p4-advanced-lane-lines'
#ROOT_PATH = 'C:/Users/jiangtao.fu/Downloads/carnd-p4-advanced-lane-lines'
CAMERA_CAL_PATH = (Path(ROOT_PATH)/'camera_cal').as_posix()
TEST_IMG_PATH = (Path(ROOT_PATH)/'test_images').as_posix()
CAMERA_CAL_FILE = (Path(ROOT_PATH)/'calibration.p').as_posix()
def calibrate_camera(img_path, nx=6, ny=9):
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
img_size = None
objp = np.zeros((nx*ny,3), np.float32)
objp[:,:2] = np.mgrid[0:nx, 0:ny].T.reshape(-1,2)
# arrays to store object points and image points from all the images.
objpoints = [] # 3d points in real world space
imgpoints = [] # 2d points in image plane.
for fname in Path(img_path).glob('calibration*.jpg'):
img = cv2.imread(fname.as_posix())
if img_size is None:
img_size = (img.shape[0], img.shape[1])
print('img_size', img_size)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# find the chessboard corners
ret, corners = cv2.findChessboardCorners(gray, (nx, ny), None)
if ret == True:
objpoints.append(objp)
imgpoints.append(corners)
# calibrate
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img_size, None, None)
return mtx, dist
mtx, dist = calibrate_camera(CAMERA_CAL_PATH, nx=6, ny=9)
camera_calibration = {'mtx': mtx, 'dist':dist}
with open((Path(ROOT_PATH)/'calibration.p').as_posix(), 'wb') as f:
pickle.dump(camera_calibration, f)
def undist_img(img, cal_file):
with open(cal_file, 'rb') as f:
calibration = pickle.load(f)
mtx = calibration['mtx']
dist = calibration['dist']
img_size = (img.shape[1], img.shape[0])
undist = cv2.undistort(img, mtx, dist, None, mtx)
return undist
def plot_undist(img_path, cal_file):
img_path = Path(img_path).as_posix()
# load camera calibration
with open(cal_file, 'rb') as f:
calibration = pickle.load(f)
mtx = calibration['mtx']
dist = calibration['dist']
img = cv2.imread(img_path)
img_size = (img.shape[1], img.shape[0])
# Do camera calibration given object points and image points
#ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img_size, None, None)
dst = cv2.undistort(img, mtx, dist, None, mtx)
#cv2.imwrite('calibration_wide/test_undist.jpg',dst)
# Save the camera calibration result for later use (we won't worry about rvecs / tvecs)
# dist_pickle = {}
# dist_pickle["mtx"] = mtx
# dist_pickle["dist"] = dist
# pickle.dump( dist_pickle, open( "calibration_wide/wide_dist_pickle.p", "wb" ) )
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
dst = cv2.cvtColor(dst, cv2.COLOR_BGR2RGB)
# Visualize undistortion
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(12,5))
ax1.imshow(img_rgb)
ax1.set_title('Original Image', fontsize=20)
ax2.imshow(dst)
ax2.set_title('Undistorted Image', fontsize=20)
plt.show()
%time plot_undist((Path(ROOT_PATH)/'camera_cal/calibration1.jpg').as_posix(), CAMERA_CAL_FILE)
Applying undistortion on all test images
for img_path in Path(TEST_IMG_PATH).glob('test*.jpg'):
img_path = Path(img_path).as_posix()
print(img_path)
plot_undist(img_path, CAMERA_CAL_FILE)
Take one test image for a quick check
#test_image_path = list(TEST_IMG_PATH.glob('*.jpg'))[0].as_posix()
test_image_path = (Path(ROOT_PATH)/'test_images/test4.jpg').as_posix()
test_image_rgb = plt.imread(test_image_path)
def plot_test_image(img_rgb, img_bin, image_names=['Original Image', 'Thresholded Gradient']):
# Plot the result
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 5))
f.tight_layout()
ax1.imshow(img_rgb)
ax1.set_title(image_names[0], fontsize=20)
ax2.imshow(img_bin, cmap='gray')
ax2.set_title(image_names[1], fontsize=20)
plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
plt.show()
Sobel operator
def abs_sobel_thresh(img_path, orient='x', sobel_kernel=3, thresh=(20, 100)):
thresh_min, thresh_max = thresh
img = cv2.imread(img_path)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
if orient == 'x':
sobel = cv2.Sobel(gray, cv2.CV_64F, 1, 0)
elif orient == 'y':
sobel = cv2.Sobel(gray, cv2.CV_64F, 0, 1)
abs_sobel = np.absolute(sobel)
scaled_sobel = np.uint8(255*abs_sobel / np.max(abs_sobel))
binary_output = np.zeros_like(scaled_sobel)
binary_output[(scaled_sobel >= thresh_min) & (scaled_sobel <= thresh_max)] = 1
return binary_output
grad_binary = abs_sobel_thresh(test_image_path, orient='x', thresh=(30, 100))
plot_test_image(test_image_rgb, grad_binary)
grad_binary = abs_sobel_thresh(test_image_path, orient='y', thresh=(30, 100))
plot_test_image(test_image_rgb, grad_binary)
Gradient magnitude
def mag_thresh(img_path, sobel_kernel=3, mag_thresh=(30, 100)):
thresh_min, thresh_max = mag_thresh
img = cv2.imread(img_path)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1)
gradmag = np.sqrt(sobelx**2 + sobely**2)
scaled_sobel = np.uint8(255*gradmag / np.max(gradmag)).astype(np.uint8)
binary_output = np.zeros_like(scaled_sobel)
binary_output[(scaled_sobel >= mag_thresh[0]) & (scaled_sobel <= mag_thresh[1])] = 1
return binary_output
mag_binary = mag_thresh(test_image_path, sobel_kernel=5, mag_thresh=(30, 100))
plot_test_image(test_image_rgb, mag_binary, image_names=['Original Image', 'Gradient Magnitude'])
Gradient Threshold
def dir_threshold(img_path, sobel_kernel=3, thresh=(0, np.pi/2)):
img = cv2.imread(img_path)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Calculate the x and y gradients
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
# Take the absolute value of the gradient direction,
# apply a threshold, and create a binary image result
absgraddir = np.arctan2(np.absolute(sobely), np.absolute(sobelx))
binary_output = np.zeros_like(absgraddir)
binary_output[(absgraddir >= thresh[0]) & (absgraddir <= thresh[1])] = 1
# Return the binary image
return binary_output
dir_binary = mag_thresh(test_image_path, sobel_kernel=9, mag_thresh=(0.5, 1.1))
plot_test_image(test_image_rgb, dir_binary)
#test_image_path = list(TEST_IMG_PATH.glob('*.jpg'))[0].as_posix()
#test_image_path = (Path(TEST_IMG_PATH)/'hard_challenge01.jpg').as_posix()
test_image_path = (Path(TEST_IMG_PATH)/'challenge03.jpg').as_posix()
#test_image_path = (Path(TEST_IMG_PATH)/'test4.jpg').as_posix()
test_image_rgb = plt.imread(test_image_path)
plt.imshow(test_image_rgb)
R = test_image_rgb[:, :, 0]
G = test_image_rgb[:, :, 1]
B = test_image_rgb[:, :, 2]
f, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(24, 9))
f.tight_layout()
ax1.imshow(R, cmap='gray')
ax1.set_title('RGB: R', fontsize=30)
ax2.imshow(G, cmap='gray')
ax2.set_title('RGB: G', fontsize=30)
ax3.imshow(B, cmap='gray')
ax3.set_title('RGB: B', fontsize=30)
plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
test_image_hls = cv2.cvtColor(test_image_rgb, cv2.COLOR_RGB2HLS)
H = test_image_hls[:, :, 0]
L = test_image_hls[:, :, 1]
S = test_image_hls[:, :, 2]
f, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(24, 9))
f.tight_layout()
ax1.imshow(H, cmap='gray')
ax1.set_title('HLS: H', fontsize=30)
ax2.imshow(L, cmap='gray')
ax2.set_title('HLS: L', fontsize=30)
ax3.imshow(S, cmap='gray')
ax3.set_title('HLS: S', fontsize=30)
plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
thresh = (200, 255)
l_bin = np.zeros_like(L)
l_bin[(L>thresh[0])&(L<=thresh[1])] = 1
plt.imshow(l_bin, cmap='gray')
test_image_hsv = cv2.cvtColor(test_image_rgb, cv2.COLOR_RGB2HSV)
H = test_image_hsv[:, :, 0]
S = test_image_hsv[:, :, 1]
V = test_image_hsv[:, :, 2]
f, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(24, 9))
f.tight_layout()
ax1.imshow(H, cmap='gray')
ax1.set_title('HLS: H', fontsize=30)
ax2.imshow(S, cmap='gray')
ax2.set_title('HLS: S', fontsize=30)
ax3.imshow(V, cmap='gray')
ax3.set_title('HLS: V', fontsize=30)
plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
sensitivity = 50
lower_white = np.array([0,0,255-sensitivity])
upper_white = np.array([255,sensitivity,255])
hsv_bin = cv2.inRange(test_image_hsv, lower_white, upper_white)
plt.imshow(hsv_bin, cmap='gray')
test_image_lab = cv2.cvtColor(test_image_rgb, cv2.COLOR_RGB2LAB)
L = test_image_lab[:, :, 0]
A = test_image_lab[:, :, 1]
B = test_image_lab[:, :, 2]
L_eq = cv2.equalizeHist(L)
B_eq = cv2.equalizeHist(L)
f, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(24, 9))
f.tight_layout()
ax1.imshow(L, cmap='gray')
ax1.set_title('LAB: L', fontsize=30)
ax2.imshow(A, cmap='gray')
ax2.set_title('LAB: A', fontsize=30)
ax3.imshow(B, cmap='gray')
ax3.set_title('LAB: B', fontsize=30)
plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
thresh = (220, 255)
l_bin = np.zeros_like(L)
l_bin[(L>thresh[0])&(L<=thresh[1])] = 1
plt.imshow(l_bin, cmap='gray')
thresh = (250, 255)
l_bin = np.zeros_like(L_eq)
l_bin[(L_eq>thresh[0])&(L_eq<=thresh[1])] = 1
plt.imshow(l_bin, cmap='gray')
thresh = (160, 255)
b_bin = np.zeros_like(B)
b_bin[(B>=thresh[0])&(B<=thresh[1])] = 1
plt.imshow(b_bin, cmap='gray')
thresh = (160, 255)
b_bin = np.zeros_like(B_eq)
b_bin[(B_eq>=thresh[0])&(B_eq<=thresh[1])] = 1
plt.imshow(b_bin, cmap='gray')
#adapted from https://medium.com/@tjosh.owoyemi/finding-lane-lines-with-colour-thresholds-beb542e0d839
def convert_to_bin(img, thresh):
output_bin = np.zeros_like(img)
output_bin[(img >= thresh[0]) & (img <= thresh[1])] = 1
return output_bin
def select_hls_color(rgb,
rgb_w_thresh=([100, 100, 200], [255, 255, 255]),
hls_w_thresh=([20, 200, 0], [255, 255, 255]),
hls_y_thresh=([10, 0, 100], [40, 255, 255])):
hls = cv2.cvtColor(rgb, cv2.COLOR_RGB2HLS)
bin_thresh = (20, 255)
# thresholding for white in RGB
rgb_w_low = np.uint8(rgb_w_thresh[0])
rgb_w_high = np.uint8(rgb_w_thresh[1])
mask = cv2.inRange(rgb, rgb_w_low, rgb_w_high)
rgb_w = cv2.bitwise_and(rgb, rgb, mask=mask).astype(np.uint8)
rgb_w = cv2.cvtColor(rgb_w, cv2.COLOR_RGB2GRAY)
rgb_w_bin = convert_to_bin(rgb_w, bin_thresh)
# thresholding for white in HLS
hls_w_low = np.uint8(hls_w_thresh[0])
hls_w_high = np.uint8(hls_y_thresh[1])
mask = cv2.inRange(hls, hls_w_low, hls_w_high)
hls_w = cv2.bitwise_and(hls, hls, mask=mask).astype(np.uint8)
hls_w = cv2.cvtColor(hls_w, cv2.COLOR_HLS2RGB)
hls_w = cv2.cvtColor(hls_w, cv2.COLOR_RGB2GRAY)
hls_w_bin = convert_to_bin(hls_w, bin_thresh)
# thresholding for yellow in HLS
hls_y_low = np.uint8(hls_y_thresh[0])
hls_y_high = np.uint8(hls_y_thresh[1])
mask = cv2.inRange(hls, hls_y_low, hls_y_high)
hls_y = cv2.bitwise_and(hls, hls, mask=mask).astype(np.uint8)
hls_y = cv2.cvtColor(hls_y, cv2.COLOR_HLS2RGB)
hls_y = cv2.cvtColor(hls_y, cv2.COLOR_RGB2GRAY)
hls_y_bin = convert_to_bin(hls_y, bin_thresh)
combined_bin = np.zeros_like(hls_y)
combined_bin[(rgb_w_bin==1)|(hls_y_bin==1)|(hls_w_bin==1)] = 1
return combined_bin
color_bin = select_hls_color(test_image_rgb)
plt.imshow(color_bin, cmap='gray')
def select_color(img, l_thresh=(220, 255), b_thresh=(200, 255)):
# Convert to HLS color space and separate the V channel
lab = cv2.cvtColor(img, cv2.COLOR_RGB2LAB)
l = lab[:, :, 0]
b = lab[:, :, 2]
l = l*255./np.max(l)
l_bin = np.zeros_like(l)
l_bin[(l>l_thresh[0])&(l<=l_thresh[1])] = 1
if np.max(b) >= 175:
b = b*255./np.max(b)
b_bin = np.zeros_like(b)
b_bin[(b>b_thresh[0])&(b<=b_thresh[1])] = 1
combined_bin = np.zeros_like(l)
combined_bin[(b_bin==1)] = 1
return combined_bin
def select_gradient_color_old(img, sx_thresh=(20, 100), l_thresh=(220, 255), b_thresh = (200, 255)):
# Convert to HLS color space and separate the V channel
lab = cv2.cvtColor(img, cv2.COLOR_RGB2LAB)
#rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
l = lab[:,:,0]
l = l*255./np.max(l)
#l_channel = lab[:,:,0]
# Sobel x
sobelx = cv2.Sobel(l, cv2.CV_64F, 1, 0) # Take the derivative in x
abs_sobelx = np.absolute(sobelx) # Absolute x derivative to accentuate lines away from horizontal
scaled_sobel = np.uint8(255*abs_sobelx/np.max(abs_sobelx))
# Threshold x gradient
sxbinary = np.zeros_like(scaled_sobel)
#sxbinary[(scaled_sobel >= sx_thresh[0]) & (scaled_sobel <= sx_thresh[1])] = 255
sxbinary[(scaled_sobel >= sx_thresh[0]) & (scaled_sobel <= sx_thresh[1]) & (l > l_thresh[0]) & (l <= l_thresh[1])] = 255
# Threshold color channel
#s_binary = np.zeros_like(s_channel)
#s_binary[(s_channel >= s_thresh[0]) & (s_channel <= s_thresh[1])] = 1
s_binary = select_color(img, b_thresh=b_thresh)*255
# Stack each channel
color_binary = np.dstack((np.zeros_like(sxbinary), sxbinary, s_binary))
#color_binary_rgb = cv2.cvtColor(color_binaZZZZry, cv2.COLOR_HLS2RGB)
return color_binary
def select_gradient_color(img, sx_thresh=(90, 100), l_thresh=(170, 255), b_thresh = (200, 255)):
# Convert to HLS color space and separate the V channel
lab = cv2.cvtColor(img, cv2.COLOR_RGB2LAB)
l = lab[:, :, 0]
l = l*255./np.max(l)
b = lab[:, :, 2]
if np.max(b) >= 175:
b = b*255./np.max(b)
b_bin = np.zeros_like(b)
b_bin[(b>b_thresh[0])&(b<=b_thresh[1])] = 255
# Sobel x
sobelx = cv2.Sobel(l, cv2.CV_64F, 1, 0)
abs_sobelx = np.absolute(sobelx)
scaled_sobel = np.uint8(255*abs_sobelx/np.max(abs_sobelx))
# threshold x gradient
sx_bin = np.zeros_like(scaled_sobel)
mask = ((scaled_sobel >= sx_thresh[0]) &
(scaled_sobel <= sx_thresh[1]) &
(l > l_thresh[0]) &
(l <= l_thresh[1]))
sx_bin[mask] = 255
# stack each channel
color_bin = np.dstack((np.zeros_like(sx_bin), sx_bin, b_bin))
return color_bin
def plot_binary_images(color_bin):
combined_bin = np.zeros_like(color_bin[:, :, 0])
combined_bin[(color_bin[:,:,1]/255==1)|(color_bin[:,:,2]/255==1)] = 1
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 5))
f.tight_layout()
ax1.imshow(color_bin)
ax1.set_title('Stacked thresholds', fontsize=20)
ax2.imshow(combined_bin, cmap='gray')
ax2.set_title('Combined color channels and gradient thresholds', fontsize=20)
plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
plt.show()
test_image_path = (Path(TEST_IMG_PATH)/'challenge03.jpg').as_posix()
test_image_rgb = plt.imread(test_image_path)
undist = undist_img(test_image_rgb, CAMERA_CAL_FILE)
plt.imshow(test_image_rgb)
SX_THRESH = (30, 90)
L_THRESH = (160, 255)
#color_bin = select_image(test_image_path, sx_thresh=(20, 100), s_thresh=(220, 255))
color_bin = select_gradient_color(test_image_rgb, sx_thresh=SX_THRESH, l_thresh=L_THRESH, b_thresh=(200, 255)).astype(np.uint8)
#color_bin = select_color(test_image_rgb, l_thresh=(200, 255), b_thresh=(200, 255)).astype(np.uint8)
plot_binary_images(color_bin)
# choose the polygon region to conduct perspetive transform
w = 450
h = 0
src = np.float32([[265, 680], [583, 460], [701, 460], [1044, 680]])
dst = np.float32([[w, 720], [w, h], [img_shape[0]-w, h], [img_shape[0]-w, 720]])
#cv2.polylines(undist, [src.astype(np.int32)], 1, (255,0,0), 2);
M = cv2.getPerspectiveTransform(src, dst)
Minv = cv2.getPerspectiveTransform(dst, src)
warped = cv2.warpPerspective(undist, M, img_shape)
warped_bin = select_gradient_color(warped, sx_thresh=SX_THRESH, l_thresh=L_THRESH).astype(np.uint8)
plt.show()
plt.imshow(warped_bin)
Manual select the polygon based on straight lane lines
test_image_path = (Path(TEST_IMG_PATH)/'straight_lines1.jpg').as_posix()
test_image_rgb = plt.imread(test_image_path)
undist = undist_img(test_image_rgb, CAMERA_CAL_FILE)
img_shape = undist.shape[:2][::-1]
print(undist.shape)
# choose the polygon region to conduct perspetive transform
w = 450
h = 0
src = np.float32([[265, 680], [583, 460], [701, 460], [1044, 680]])
dst = np.float32([[w, 720], [w, h], [img_shape[0]-w, h], [img_shape[0]-w, 720]])
fig, ax = plt.subplots(1, 1, figsize=(20, 12))
cv2.polylines(undist, [src.astype(np.int32)], isClosed=True, color=(255,0,0), thickness=1);
plt.imshow(undist[undist.shape[0]//2:, :,:])
M = cv2.getPerspectiveTransform(src, dst)
Minv = cv2.getPerspectiveTransform(dst, src)
warped = cv2.warpPerspective(undist, M, img_shape)
plt.imshow(warped)
Perspective Transform for curved lines
test_image_path = (Path(TEST_IMG_PATH)/'test4.jpg').as_posix()
test_image_rgb = plt.imread(test_image_path)
undist = undist_img(test_image_rgb, CAMERA_CAL_FILE)
# choose the polygon region to conduct perspetive transform
w = 450
h = 0
src = np.float32([[265, 680], [583, 460], [701, 460], [1044, 680]])
dst = np.float32([[w, 720], [w, h], [img_shape[0]-w, h], [img_shape[0]-w, 720]])
cv2.polylines(undist, [src.astype(np.int32)], 1, (255,0,0), 2);
plt.imshow(undist)
M = cv2.getPerspectiveTransform(src, dst)
Minv = cv2.getPerspectiveTransform(dst, src)
warped = cv2.warpPerspective(undist, M, img_shape)
plt.imshow(warped)
binary_warped = plt.imread((Path(ROOT_PATH)/'examples/warped_example.png').as_posix())
#test_image_path = '/home/downloads/carnd-p4-advanced-lane-lines/test_images/test3.jpg'
#binary_warped = plt.imread(test_image_path)
binary_warped = cv2.cvtColor(binary_warped, cv2.COLOR_RGB2GRAY)
plt.imshow(binary_warped, cmap='gray')
binary_warped.shape
def find_lane_pixels(binary_warped):
# Take a histogram of the bottom half of the image
histogram = np.sum(binary_warped[binary_warped.shape[0]//2:,:], axis=0)
# Create an output image to draw on and visualize the result
out_img = np.dstack((binary_warped, binary_warped, binary_warped))*255
# Find the peak of the left and right halves of the histogram
# These will be the starting point for the left and right lines
midpoint = np.int(histogram.shape[0]//2)
leftx_base = np.argmax(histogram[:midpoint])
rightx_base = np.argmax(histogram[midpoint:]) + midpoint
# HYPERPARAMETERS
# Choose the number of sliding windows
nwindows = 9
# Set the width of the windows +/- margin
margin = 100
# Set minimum number of pixels found to recenter window
minpix = 50
# Set height of windows - based on nwindows above and image shape
window_height = np.int(binary_warped.shape[0]//nwindows)
# Identify the x and y positions of all nonzero pixels in the image
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Current positions to be updated later for each window in nwindows
leftx_current = leftx_base
rightx_current = rightx_base
# Create empty lists to receive left and right lane pixel indices
left_lane_inds = []
right_lane_inds = []
# Step through the windows one by one
for window in range(nwindows):
# Identify window boundaries in x and y (and right and left)
win_y_low = binary_warped.shape[0] - (window+1)*window_height
win_y_high = binary_warped.shape[0] - window*window_height
### TO-DO: Find the four below boundaries of the window ###
win_xleft_low = leftx_current - margin # Update this
win_xleft_high = leftx_current + margin # Update this
win_xright_low = rightx_current - margin # Update this
win_xright_high = rightx_current + margin # Update this
# Draw the windows on the visualization image
cv2.rectangle(out_img,(win_xleft_low, win_y_low), (win_xleft_high, win_y_high), (0,255,0), 2)
cv2.rectangle(out_img,(win_xright_low,win_y_low), (win_xright_high,win_y_high), (0,255,0), 2)
### TO-DO: Identify the nonzero pixels in x and y within the window ###
good_left_inds = ((nonzeroy >= win_y_low) &
(nonzeroy < win_y_high) &
(nonzerox >= win_xleft_low) &
(nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low) &
(nonzeroy < win_y_high) &
(nonzerox >= win_xright_low) &
(nonzerox < win_xright_high)).nonzero()[0]
# Append these indices to the lists
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
### TO-DO: If you found > minpix pixels, recenter next window ###
### (`right` or `leftx_current`) on their mean position ###
if len(good_left_inds) > minpix:
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > minpix:
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
# Concatenate the arrays of indices (previously was a list of lists of pixels)
try:
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
except ValueError:
# Avoids an error if the above is not implemented fully
pass
# Extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
return leftx, lefty, rightx, righty, out_img
def fit_poly(img_shape, leftx, lefty, rightx, righty):
### TO-DO: Fit a second order polynomial to each with np.polyfit() ###
left_fit = np.polyfit(lefty, leftx, 2)
right_fit = np.polyfit(righty, rightx, 2)
# Generate x and y values for plotting
ploty = np.linspace(0, img_shape[0]-1, img_shape[0])
try:
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
except TypeError:
# Avoids an error if `left` and `right_fit` are still none or incorrect
print('The function failed to fit a line!')
left_fitx = 1*ploty**2 + 1*ploty
right_fitx = 1*ploty**2 + 1*ploty
return left_fit, right_fit, left_fitx, right_fitx, ploty
def fit_polynomial(binary_warped):
# Find our lane pixels first
leftx, lefty, rightx, righty, out_img = find_lane_pixels(binary_warped)
img_shape = binary_warped.shape
## Fit a second order polynomial to each using `np.polyfit`
left_fit, right_fit, left_fitx, right_fitx, ploty = fit_poly(img_shape, leftx, lefty, rightx, righty)
## Visualization ##
# Colors in the left and right lane regions
out_img[lefty, leftx] = [255, 0, 0]
out_img[righty, rightx] = [0, 0, 255]
result = {
'img': out_img,
'left_fitx': left_fitx,
'right_fitx': right_fitx,
'left_fit': left_fit,
'right_fit': right_fit,
'ploty': ploty
}
return result
def visualize_result(img, left_fitx, right_fitx, ploty):
# Plots the left and right polynomials on the lane lines
fig, ax = plt.subplots(1, 1, figsize=(12, 5))
ax.plot(left_fitx, ploty, color='yellow')
ax.plot(right_fitx, ploty, color='yellow')
ax.imshow(img.astype(int))
result = fit_polynomial(binary_warped)
visualize_result(result['img'], result['left_fitx'], result['right_fitx'], result['ploty'])
ploty = result['ploty']
left_fit = result['left_fit']
right_fit = result['right_fit']
left_fitx = result['left_fitx']
right_fitx = result['right_fitx']
# Define y-value where we want radius of curvature
y_eval = np.max(ploty)
left_rad = ((1+(2*left_fit[0]*y_eval+left_fit[1])**2)**1.5)/np.absolute(2*left_fit[0])
right_rad = ((1+(2*right_fit[0]*y_eval+right_fit[1])**2)**1.5)/np.absolute(2*right_fit[0])
print(y_eval, left_rad, right_rad)
ym_per_pix = 30/720. # meters per pixel in y dimension
xm_per_pix = 3.7/700
left_fit_real = np.polyfit(ploty*ym_per_pix, left_fitx*xm_per_pix, 2)
right_fit_real = np.polyfit(ploty*ym_per_pix, right_fitx*xm_per_pix, 2)
left_rad_real = ((1+(2*left_fit_real[0]*y_eval*ym_per_pix+left_fit_real[1])**2)**1.5)/np.absolute(2*left_fit_real[0])
right_rad_real = ((1+(2*right_fit_real[0]*y_eval*ym_per_pix+right_fit_real[1])**2)**1.5)/np.absolute(2*right_fit_real[0])
print(left_rad_real, right_rad_real)
The goals / steps of this project are the following:
from collections import deque
class Lines():
def __init__(self, max_len=10):
# was the line detected in the last iteration?
self.detected = False
# x values of the last n fits of the line
self.recent_xfitted = deque(maxlen=max_len)
#average x values of the fitted line over the last n iterations
#self.bestx = None
#polynomial coefficients averaged over the last n iterations
self.best_fit = None
#polynomial fits of the last n iterations
self.recent_fit = deque(maxlen=max_len)
#polynomial coefficients for the most recent fit
self.current_fit = None
#radius of curvature of the line in some units
self.radius_of_curvature = None
#distance in meters of vehicle center from the line
self.line_base_pos = None
#difference in fit coefficients between last and new fits
self.diffs = np.array([0,0,0], dtype='float')
#x values for detected line pixels
self.allx = None
#y values for detected line pixels
self.ally = None
# recent x and y values
self.recent_allx = deque(maxlen=max_len)
self.recent_ally = deque(maxlen=max_len)
def update(self, leftx, rightx, lefty, righty):
self.allx = (leftx, rightx)
self.ally = (lefty, righty)
self.recent_allx.append(self.allx)
self.recent_ally.append(self.ally)
def update_fitx(self, left_fitx, right_fitx):
self.recent_xfitted.append((left_fitx, right_fitx))
def update_fit(self, left_fit, right_fit):
# update self.diffs before update current_fit
if self.current_fit is not None:
self.diffs = (self.current_fit[0]-left_fit, self.current_fit[1]-right_fit)
# TODO: do sanity check on difference between last and new fits
self.current_fit = (left_fit, right_fit)
self.recent_fit.append((left_fit, right_fit))
self.best_fit = np.mean(np.array(self.recent_fit), axis=0)
#self.best_fit=self.current_fit
class LaneFindingPipeline():
def __init__(self,
root_path,
camera_cal_file,
b_thresh=(190, 255),
l_thresh=(180, 255),
nwindows = 9,
margin=100,
minpix=50,
ym_per_pix=30/720.,
xm_per_pix=3.7/720):
self.root_path = root_path
with open(camera_cal_file, 'rb') as f:
calibration = pickle.load(f)
self.mtx = calibration['mtx']
self.dist = calibration['dist']
self.b_thresh = b_thresh
self.l_thresh = l_thresh
# hyperparameters for polinomial fit
self.nwindows = nwindows
self.margin = margin
self.minpix = minpix
# meters per pixel in x and y dimension
self.ym_per_pix = ym_per_pix
self.xm_per_pix = xm_per_pix
# define perpective transform and inverse transform
#src = np.float32([[200, 720], [580, 460], [700, 460], [1110, 720]])
#dst = np.float32([[300, 720], [300, 200], [900, 200], [900, 720]])
w = 400
h = 0
src = np.float32([[265, 680], [583, 460], [701, 460], [1044, 680]])
dst = np.float32([[w, 720], [w, h], [img_shape[0]-w, h], [img_shape[0]-w, 720]])
self.M = cv2.getPerspectiveTransform(src, dst)
self.Minv = cv2.getPerspectiveTransform(dst, src)
# lane lines
self.line = Lines()
def undist_img(self, img):
img_size = (img.shape[1], img.shape[0])
undist = cv2.undistort(img, self.mtx, self.dist, None, self.mtx)
return undist
def convert_to_bin(self, img, thresh):
output_bin = np.zeros_like(img)
output_bin[(img >= thresh[0]) & (img <= thresh[1])] = 1
return output_bin
def select_color(self, img):
# Convert to HLS color space and separate the V channel
#hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
lab = cv2.cvtColor(img, cv2.COLOR_RGB2LAB)
l = lab[:, :, 0]
b = lab[:, :, 2]
l = l*255./np.max(l)
l_bin = np.zeros_like(l)
l_bin[(l>self.l_thresh[0])&(l<=self.l_thresh[1])] = 1
if np.max(b) >= 170:
b = b*255./np.max(b)
b_bin = np.zeros_like(b)
b_bin[(b>self.b_thresh[0])&(b<=self.b_thresh[1])] = 1
combined_bin = np.zeros_like(l)
#combined_bin[(l_bin==1)|(b_bin==1)] = 1
combined_bin[b_bin==1] = 1
return combined_bin
def select_gradient_color(self, img, sx_thresh=(20, 100), l_thresh=(160, 255), b_thresh = (185, 255)):
lab = cv2.cvtColor(img, cv2.COLOR_RGB2LAB)
l = lab[:, :, 0]
l = l*255./np.max(l)
b = lab[:, :, 2]
if np.max(b) >= 175:
b = b*255./np.max(b)
b_bin = np.zeros_like(b)
b_bin[(b>b_thresh[0])&(b<=b_thresh[1])] = 1
# Sobel x
sobelx = cv2.Sobel(l, cv2.CV_64F, 1, 0)
abs_sobelx = np.absolute(sobelx)
scaled_sobel = np.uint8(255*abs_sobelx/np.max(abs_sobelx))
# threshold x gradient
sx_bin = np.zeros_like(scaled_sobel)
mask = ((scaled_sobel >= sx_thresh[0]) &
(scaled_sobel <= sx_thresh[1]) &
(l > l_thresh[0]) &
(l <= l_thresh[1]))
sx_bin[mask] = 1
# stack each channel
#color_bin = np.dstack((np.zeros_like(sx_bin), sx_bin, b_bin))
combined_bin = np.zeros_like(sx_bin)
combined_bin[(b_bin==1)|(sx_bin==1)]=1
return combined_bin
def transform_perspective(self, img):
img_shape = img.shape[1], img.shape[0]
warped = cv2.warpPerspective(img, self.M, img_shape)
return warped
def thresh_transform_perspective(self, img):
undist = self.undist_img(img)
#colored_bin = self.select_color(undist)
#combined_bin = self.combine_selection(colored_bin)
colored_bin = self.select_gradient_color(undist)
#colored_bin = self.select_grad_hls_color(undist)
warped = self.transform_perspective(colored_bin)
return warped
def calculate_hist(self, binary_warped):
histogram = np.sum(binary_warped[binary_warped.shape[0]//2:,:], axis=0)
return histogram
def find_lane_pixels(self, binary_warped):
# Take a histogram of the bottom half of the image
histogram = self.calculate_hist(binary_warped)
# Create an output image to draw on and visualize the result
out_img = np.dstack((binary_warped, binary_warped, binary_warped))*255
# Find the peak of the left and right halves of the histogram
# These will be the starting point for the left and right lines
midpoint = np.int(histogram.shape[0]//2)
leftx_base = np.argmax(histogram[:midpoint])
rightx_base = np.argmax(histogram[midpoint:]) + midpoint
# HYPERPARAMETERS
# Choose the number of sliding windows
nwindows = self.nwindows
# Set the width of the windows +/- margin
margin = self.margin
# Set minimum number of pixels found to recenter window
minpix = self.minpix
# Set height of windows - based on nwindows above and image shape
window_height = np.int(binary_warped.shape[0]//self.nwindows)
# Identify the x and y positions of all nonzero pixels in the image
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Current positions to be updated later for each window in nwindows
leftx_current = leftx_base
rightx_current = rightx_base
# Create empty lists to receive left and right lane pixel indices
left_lane_inds = []
right_lane_inds = []
# Step through the windows one by one
for window in range(nwindows):
# Identify window boundaries in x and y (and right and left)
win_y_low = binary_warped.shape[0] - (window+1)*window_height
win_y_high = binary_warped.shape[0] - window*window_height
### TO-DO: Find the four below boundaries of the window ###
win_xleft_low = leftx_current - margin # Update this
win_xleft_high = leftx_current + margin # Update this
win_xright_low = rightx_current - margin # Update this
win_xright_high = rightx_current + margin # Update this
# Draw the windows on the visualization image
cv2.rectangle(out_img,(win_xleft_low, win_y_low), (win_xleft_high, win_y_high), (0,255,0), 2)
cv2.rectangle(out_img,(win_xright_low,win_y_low), (win_xright_high,win_y_high), (0,255,0), 2)
### TO-DO: Identify the nonzero pixels in x and y within the window ###
good_left_inds = ((nonzeroy >= win_y_low) &
(nonzeroy < win_y_high) &
(nonzerox >= win_xleft_low) &
(nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low) &
(nonzeroy < win_y_high) &
(nonzerox >= win_xright_low) &
(nonzerox < win_xright_high)).nonzero()[0]
# Append these indices to the lists
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
### TO-DO: If you found > minpix pixels, recenter next window ###
### (`right` or `leftx_current`) on their mean position ###
if len(good_left_inds) > minpix:
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > minpix:
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
# Concatenate the arrays of indices (previously was a list of lists of pixels)
try:
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
except ValueError:
# Avoids an error if the above is not implemented fully
pass
# Extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
return leftx, lefty, rightx, righty, out_img
def search_around_poly(self, binary_warped):
# HYPERPARAMETER
# Choose the width of the margin around the previous polynomial to search
# The quiz grader expects 100 here, but feel free to tune on your own!
margin = self.margin
left_fit, right_fit = self.line.best_fit
# Grab activated pixels
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
### TO-DO: Set the area of search based on activated x-values ###
### within the +/- margin of our polynomial function ###
### Hint: consider the window areas for the similarly named variables ###
### in the previous quiz, but change the windows to our new search area ###
left_lane_inds = ((nonzerox > (left_fit[0]*(nonzeroy**2) + left_fit[1]*nonzeroy +
left_fit[2] - margin)) & (nonzerox < (left_fit[0]*(nonzeroy**2) +
left_fit[1]*nonzeroy + left_fit[2] + margin)))
right_lane_inds = ((nonzerox > (right_fit[0]*(nonzeroy**2) + right_fit[1]*nonzeroy +
right_fit[2] - margin)) & (nonzerox < (right_fit[0]*(nonzeroy**2) +
right_fit[1]*nonzeroy + right_fit[2] + margin)))
# Again, extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
# Fit new polynomials
self.fit_poly(binary_warped.shape, leftx, lefty, rightx, righty)
def fit_poly(self, img_shape, leftx, lefty, rightx, righty):
# use recent x and y values from recent n iterations
if leftx is None or rightx is None:
return
self.line.update(leftx, rightx, lefty, righty)
all_leftx = np.concatenate([x[0] for x in self.line.recent_allx])
all_rightx = np.concatenate([x[1] for x in self.line.recent_allx])
all_lefty = np.concatenate([y[0] for y in self.line.recent_ally])
all_righty = np.concatenate([y[1] for y in self.line.recent_ally])
### Fit a second order polynomial to each with np.polyfit() ###
ploty = np.linspace(0, img_shape[0]-1, img_shape[0])
try:
left_fit = np.polyfit(all_lefty, all_leftx, 2)
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fit = np.polyfit(all_righty, all_rightx, 2)
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
self.line.update_fit(left_fit, right_fit)
except TypeError:
# Avoids an error if `left` and `right_fit` are still none or incorrect
#print('The function failed to fit a line!')
self.line.detected = False
return
if self.line.best_fit is not None:
left_fit, right_fit = self.line.best_fit
# update result
self.line.update_fitx(left_fitx, right_fitx)
self.line.detected = True
def get_ploty(self, num_pts):
ploty = np.linspace(0, num_pts-1, num_pts)
return ploty
def fit_polynomial(self, binary_warped):
img_shape = binary_warped.shape
# at the beginning, search by sliding windows
# after that search near the previous window
if self.line.current_fit is None:
leftx, lefty, rightx, righty, _ = self.find_lane_pixels(binary_warped)
## Fit a second order polynomial to each using `np.polyfit`
self.fit_poly(img_shape, leftx, lefty, rightx, righty)
else:
self.search_around_poly(binary_warped)
def visualize_fit_poly(self, img):
left_fitx, right_fitx = self.line.recent_xfitted[-1]
ploty = self.get_ploty(len(left_fitx))
# Plots the left and right polynomials on the lane lines
fig, ax = plt.subplots(1, 1, figsize=(12, 5))
ax.plot(left_fitx, ploty, color='red')
ax.plot(right_fitx, ploty, color='red')
ax.imshow(img.astype(int), cmap='gray')
def draw_lane_area(self, img, warped, undistorted=False):
img_shape = (img.shape[0], img.shape[1])
if undistorted == False:
undist = self.undist_img(img)
else:
undist = img
left_fitx, right_fitx = self.line.recent_xfitted[-1]
ploty = np.linspace(0, img_shape[0]-1, img_shape[0])
# Create an image to draw the lines on
warp_zero = np.zeros_like(warped).astype(np.uint8)
color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
# Recast the x and y points into usable format for cv2.fillPoly()
pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])
pts = np.hstack((pts_left, pts_right))
# Draw the lane onto the warped blank image
cv2.fillPoly(color_warp, np.int_([pts]), (0,255, 0))
# Warp the blank back to original image space using inverse perspective matrix (Minv)
newwarp = cv2.warpPerspective(color_warp, self.Minv, (img.shape[1], img.shape[0]))
# Combine the result with the original image
result = cv2.addWeighted(undist, 1, newwarp, 0.3, 0)
#plt.imshow(result)
return result
def process_with_pipeline(self, img):
#print(img.shape)
warped = self.thresh_transform_perspective(img)
self.fit_polynomial(warped)
annotated_img = self.draw_lane_area(img, warped)
return annotated_img
def cal_curvature_center(self, img):
height = img.shape[0]
width = img.shape[1]
ploty = self.get_ploty(height)
left_fit, right_fit = self.line.current_fit
left_fitx, right_fitx = self.line.recent_xfitted[-1]
# Define y-value where we want radius of curvature
y_eval = np.max(ploty)
#left_rad = (1+(2*left_fit[0]*y_eval+left_fit[1]**2)**1.5)/np.absolute(2*left_fit[0])
#right_rad = (1+(2*right_fit[0]*y_eval+right_fit[1]**2)**1.5)/np.absolute(2*right_fit[0])
ym_per_pix = self.ym_per_pix # meters per pixel in y dimension
xm_per_pix = self.xm_per_pix
left_fit_real = np.polyfit(ploty*ym_per_pix, left_fitx*xm_per_pix, 2)
right_fit_real = np.polyfit(ploty*ym_per_pix, right_fitx*xm_per_pix, 2)
#pdb.set_trace()
left_rad_real = ((1+(2*left_fit_real[0]*y_eval*ym_per_pix+left_fit_real[1])**2)**1.5)/np.absolute(2*left_fit_real[0])
right_rad_real = ((1+(2*right_fit_real[0]*y_eval*ym_per_pix+right_fit_real[1])**2)**1.5)/np.absolute(2*right_fit_real[0])
self.line.radius_of_curvature = (left_rad_real, right_rad_real)
car_pos = width/2
lane_leftx = left_fit[0]*h**2+left_fit[1]*h+left_fit[2]
lane_rightx = right_fit[0]*h**2+right_fit[1]*h+right_fit[2]
lane_center_pos = (lane_leftx + lane_rightx)/2
self.center_dist = (car_pos - lane_center_pos) * self.xm_per_pix
test_image_path = '/home/downloads/carnd-p4-advanced-lane-lines/test_images/challenge01.jpg'
test_image_rgb = plt.imread(test_image_path)
lfp = LaneFindingPipeline(ROOT_PATH, CAMERA_CAL_FILE)
plt.imshow(test_image_rgb)
warped = lfp.thresh_transform_perspective(test_image_rgb)
lfp.fit_polynomial(warped)
lfp.visualize_fit_poly(warped)
lfp.cal_curvature_center(warped)
print(lfp.line.radius_of_curvature)
print(lfp.center_dist)
plt.imshow(lfp.process_with_pipeline(test_image_rgb))
test_image_path = '/home/downloads/carnd-p4-advanced-lane-lines/test_images/test4.jpg'
test_image_rgb = plt.imread(test_image_path)
lfp = LaneFindingPipeline(ROOT_PATH, CAMERA_CAL_FILE)
warped = lfp.thresh_transform_perspective(test_image_rgb)
plt.imshow(test_image_rgb)
histogram = lfp.calculate_hist(warped)
plt.plot(histogram)
result = lfp.fit_polynomial(warped)
lfp.visualize_fit_poly(warped)
plt.imshow(lfp.draw_lane_area(test_image_rgb, warped, result))
for img_path in Path(TEST_IMG_PATH).glob('*jpg'):
print(img_path)
test_img_rgb = plt.imread(img_path)
lfp = LaneFindingPipeline(ROOT_PATH, CAMERA_CAL_FILE)
warped = lfp.thresh_transform_perspective(test_img_rgb)
lfp.fit_polynomial(warped)
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(24, 12))
ax1.imshow(test_img_rgb)
ax2.imshow(warped, cmap='gray')
ploty = lfp.get_ploty(len(left_fitx))
# Plots the left and right polynomials on the lane lines
ax2.imshow(lfp.draw_lane_area(test_img_rgb, warped))
if lfp.line.detected:
left_fitx, right_fitx = lfp.line.recent_xfitted[-1]
ax3.plot(left_fitx, ploty, color='red')
ax3.plot(right_fitx, ploty, color='red')
ax3.imshow(warped.astype(int), cmap='gray')
plt.show()
# Import everything needed to edit/save/watch video clips# Import
from moviepy.editor import VideoFileClip
from IPython.display import HTML
VIDEO_INPUT_PATH = Path(ROOT_PATH)/'test_videos'
VIDEO_OUTPUT_PATH = Path(ROOT_PATH)/'test_videos_result'
lfp = LaneFindingPipeline(ROOT_PATH, CAMERA_CAL_FILE)
clip1 = VideoFileClip((VIDEO_INPUT_PATH/'project_video.mp4').as_posix())
white_clip = clip1.fl_image(lfp.process_with_pipeline) #NOTE: this function expects color images!!
%time white_clip.write_videofile((VIDEO_OUTPUT_PATH/'project_video_result.mp4').as_posix(), audio=False, progress_bar=False)
lfp = LaneFindingPipeline(ROOT_PATH, CAMERA_CAL_FILE)
clip2 = VideoFileClip((VIDEO_INPUT_PATH/'challenge_video.mp4').as_posix())
white_clip = clip2.fl_image(lfp.process_with_pipeline) #NOTE: this function expects color images!!
%time white_clip.write_videofile((VIDEO_OUTPUT_PATH/'challenge_video_result.mp4').as_posix(), audio=False, progress_bar=False)
lfp = LaneFindingPipeline(ROOT_PATH, CAMERA_CAL_FILE)
clip3 = VideoFileClip((VIDEO_INPUT_PATH/'harder_challenge_video.mp4').as_posix())
white_clip = clip3.fl_image(lfp.process_with_pipeline) #NOTE: this function expects color images!!
%time white_clip.write_videofile((VIDEO_OUTPUT_PATH/'harder_challenge_video_result.mp4').as_posix(), audio=False, progress_bar=False)